From 8a981e0bf25e64ba0705e9ce6511a32604078be4 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 24 May 2007 10:45:03 +0100 Subject: [PATCH] Make map_domain_page_global fail When the global mapping cache runs out, make map_domain_page_global return NULL on failure rather than fire an assertion failure. This also updates the callers to handle the error gracefully. The only exception to this is the shadow pagetable code, which uses map_domain_page_global to create a mapping for v->arch.paging.shadow.guest_vtable; it's not clear this needs to be a global mapping anyway. Signed-off-by: Jeremy Fitzhardinge --- xen/arch/x86/hvm/vlapic.c | 9 ++++++++- xen/arch/x86/mm/shadow/multi.c | 5 +++++ xen/arch/x86/x86_32/domain_page.c | 16 ++++++++++++---- 3 files changed, 25 insertions(+), 5 deletions(-) diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c index 8e1dbd311d..e9d6330f31 100644 --- a/xen/arch/x86/hvm/vlapic.c +++ b/xen/arch/x86/hvm/vlapic.c @@ -918,12 +918,19 @@ int vlapic_init(struct vcpu *v) vlapic->regs_page = alloc_domheap_page(NULL); if ( vlapic->regs_page == NULL ) { - dprintk(XENLOG_ERR, "malloc vlapic regs error for vcpu %x\n", + dprintk(XENLOG_ERR, "malloc vlapic regs_page error for vcpu %x\n", v->vcpu_id); return -ENOMEM; } vlapic->regs = map_domain_page_global(page_to_mfn(vlapic->regs_page)); + if ( vlapic->regs == NULL ) + { + dprintk(XENLOG_ERR, "malloc vlapic regs error for vcpu %x\n", + v->vcpu_id); + return -ENOMEM; + } + memset(vlapic->regs, 0, PAGE_SIZE); vlapic_reset(vlapic); diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c index f55382ac29..57e25c829f 100644 --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -3485,6 +3485,8 @@ sh_update_cr3(struct vcpu *v, int do_locking) if ( v->arch.paging.shadow.guest_vtable ) sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable); v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn); + /* PAGING_LEVELS==4 implies 64-bit, which means that + * map_domain_page_global can't fail */ } else v->arch.paging.shadow.guest_vtable = __linear_l4_table; @@ -3515,6 +3517,9 @@ sh_update_cr3(struct vcpu *v, int do_locking) if ( v->arch.paging.shadow.guest_vtable ) sh_unmap_domain_page_global(v->arch.paging.shadow.guest_vtable); v->arch.paging.shadow.guest_vtable = sh_map_domain_page_global(gmfn); + /* Does this really need map_domain_page_global? Handle the + * error properly if so. */ + ASSERT( v->arch.paging.shadow.guest_vtable ); } else v->arch.paging.shadow.guest_vtable = __linear_l2_table; diff --git a/xen/arch/x86/x86_32/domain_page.c b/xen/arch/x86/x86_32/domain_page.c index 551ce50593..59c129ee13 100644 --- a/xen/arch/x86/x86_32/domain_page.c +++ b/xen/arch/x86/x86_32/domain_page.c @@ -218,17 +218,25 @@ void *map_domain_page_global(unsigned long mfn) idx = find_first_zero_bit(inuse, GLOBALMAP_BITS); va = IOREMAP_VIRT_START + (idx << PAGE_SHIFT); - ASSERT(va < FIXADDR_START); + if ( va >= FIXADDR_START ) + { + va = 0; + goto fail; + } } set_bit(idx, inuse); inuse_cursor = idx + 1; + fail: spin_unlock(&globalmap_lock); - pl2e = virt_to_xen_l2e(va); - pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va); - l1e_write(pl1e, l1e_from_pfn(mfn, __PAGE_HYPERVISOR)); + if ( likely(va != 0) ) + { + pl2e = virt_to_xen_l2e(va); + pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(va); + l1e_write(pl1e, l1e_from_pfn(mfn, __PAGE_HYPERVISOR)); + } return (void *)va; } -- 2.30.2